Skip to main content

AI Models

This section documents the AI model integrations available in the codebase.

Table of Contents

Anthropic Claude

File: models/ai/anthropic/claude.py

The Claude module provides a wrapper for interacting with Anthropic's Claude AI models, offering capabilities for text generation, chat conversations, and specialized functions like product matching.

Initialization

from models.ai.anthropic.claude import ClaudeAI

# Initialize with API key from environment variable
claude = ClaudeAI() # Uses CLAUDE_API_KEY from environment

# Or specify API key directly
claude = ClaudeAI(api_key="your_api_key_here")

# Specify a different model
claude.set_model("claude-3-opus")

Available models include:

  • claude-3-opus
  • claude-3-sonnet
  • claude-3.5-sonnet
  • claude-3-haiku

Basic Text Generation

response = claude.generate_response(
prompt="Tell me a story about a magic backpack",
max_tokens=2000,
temperature=0.7
)
print(response["text"])

Chat Conversations

messages = [
{"role": "user", "content": "Hello, Claude!"},
{"role": "assistant", "content": "Hello! How can I help you today?"},
{"role": "user", "content": "Can you explain quantum computing?"}
]
response = claude.generate_chat(messages)
print(response["text"])

Product Matching

# Match products with default parameters
products = claude.get_matched_products(
prompt="iPhone 15 Pro Max",
full=False # Returns only the matches without metadata
)
print(products)

# Match with entity types
products = claude.get_matched_products(
prompt="iPhone 15 Pro Max",
etypes=[{"etid": "phone", "name": "Smartphone"}],
full=True # Includes metadata in response
)
print(products)

Product Specifications

# Get detailed product specifications
specs = claude.get_product_specs(
prompt="MacBook Pro M2 16-inch",
full=False
)
print(specs)

Vehicle Specifications

# Get vehicle specifications
vehicle_specs = claude.get_vehicle_specs(
prompt="2022 Toyota Camry Hybrid",
full=True
)
print(vehicle_specs)

Motorbike Specifications

# Get motorbike specifications
bike_specs = claude.get_motorbike_specs(
prompt="2023 Honda CBR1000RR-R Fireblade",
full=True
)
print(bike_specs)

Product Keywords Generation

# Generate keywords for a product
keywords = claude.get_product_keywords(
prompt="iPhone 15 Pro Max",
tag_count=15,
languages=["en", "es"], # Generate tags in English and Spanish
industry="e-commerce"
)
print(keywords)

OpenAI ChatGPT

File: models/ai/openai/chatgpt.py

The ChatGPT module provides access to OpenAI's GPT models with capabilities for text generation, classification, and specialized functions.

Initialization

from models.ai.openai.chatgpt import ChatGPT

# Initialize with API key from environment variable
chat_gpt = ChatGPT() # Uses CHAT_GPT_API_KEY from environment

Text Generation

# Generate a response with default parameters
response = chat_gpt.generate_response(
prompt="Tell me a joke."
)
print(response)

# Generate with custom parameters
response = chat_gpt.generate_response(
prompt="Write a story about a haunted house",
model="gpt-4o", # Use GPT-4o model
temperature=0.8,
max_tokens=500,
top_p=0.95
)
print(response)

Classification

# Classify a product description into categories
product_description = "The Eiffel Tower is a wrought-iron lattice tower on the Champ de Mars in Paris, France."
categories = ["Technology", "Food", "Clothing", "Places", "Books", "Furniture"]
categories_with_confidence = chat_gpt.classify_with_description(product_description, categories)
print(categories_with_confidence)

Product Specifications

# Get detailed product specifications
specs = chat_gpt.get_product_specs(
prompt="MacBook Pro M2 16-inch",
model="gpt-4o",
max_tokens=2000
)
print(specs)

Product Matching

# Find products matching a query
products = chat_gpt.get_matched_products(
prompt="iPhone 15 Pro Max",
etypes=[{"name": "phones", "etid_root": 406}],
model="gpt-4o-mini"
)
print(products)

# Get detailed product specifications
product_specs = chat_gpt.get_matched_product_specifications(
prompt="Samsung Galaxy S24 Ultra",
model="gpt-4o-mini"
)
print(product_specs)

Entity Keywords and Metadata

# Get metadata for an entity
entity = {"name": "iPhone 15 Pro Max", "description": "Apple's flagship smartphone"}
metadata = chat_gpt.get_entity_keywords_metadata(entity)
print(metadata)

# Get accolades for an entity
accolades = chat_gpt.get_entity_accolades_metadata(entity)
print(accolades)

Image Analysis

# Describe an image
description = chat_gpt.get_image_description("https://example.com/image.jpg")
print(description)

# Extract colors from an image
colors = chat_gpt.extract_image_colors("https://example.com/image.jpg")
print(colors)

# Find basic color matches from marketing color name
color_matches = chat_gpt.get_basic_color_matches_from_marketing_color("Midnight Green")
print(color_matches)

Google Gemini

File: models/google_cloud/ai/gemini.py

The Gemini module provides access to Google's Gemini family of multimodal large language models.

Initialization

import google.generativeai as genai
from models.google_cloud.ai.gemini import GenerativeModel

# Configure with API key
genai.configure(api_key='YOUR_API_KEY')

# Initialize model with default model (gemini-pro)
model = GenerativeModel()

# Or specify a different model
model = GenerativeModel('gemini-pro-vision') # For multimodal capabilities

Text Generation

# Generate text content
result = model.generate_content('Tell me a story about a magic backpack')
print(result.text)

# Generate with parameters
result = model.generate_content(
'Explain quantum computing',
generation_config={
'temperature': 0.8,
'max_output_tokens': 500,
}
)
print(result.text)

Multimodal Inputs

import PIL.Image

# Load an image
image = PIL.Image.open('image.jpg')

# Generate content from text and image
result = model.generate_content([
"What's shown in this image?",
image
])
print(result.text)

Streaming Responses

# Stream response as it's generated
response = model.generate_content('Write a long story about space travel', stream=True)
for chunk in response:
print(chunk.text, end='')

Chat Conversations

# Start a chat session
chat = model.start_chat()

# Send messages and get responses
response = chat.send_message("Hi, I have some questions for you.")
print(response.text)

response = chat.send_message("What is the meaning of life?")
print(response.text)

# Access chat history
print(chat.history)

Function Calling

# Define functions
def get_weather(location):
return {"location": location, "temperature": "22°C", "condition": "Sunny"}

# Create a function library
tools = {
"get_weather": {
"function": get_weather,
"description": "Get the current weather for a location",
"parameters": {
"location": {
"type": "string",
"description": "The city and state, e.g. San Francisco, CA",
}
}
}
}

# Start a chat with function calling enabled
chat = model.start_chat(enable_automatic_function_calling=True)

# Send a message that might trigger a function call
response = chat.send_message("What's the weather like in San Francisco?", tools=tools)
print(response.text)

Token Counting

# Count tokens in content
token_count = model.count_tokens("This is a sample text to count tokens.")
print(f"Token count: {token_count.total_tokens}")

# Count tokens for a conversation
messages = [
{"role": "user", "content": "Hello"},
{"role": "model", "content": "Hi there! How can I help you today?"},
{"role": "user", "content": "Tell me about Paris"}
]
token_count = model.count_tokens(messages)
print(f"Conversation token count: {token_count.total_tokens}")

Back to Main Index